Setup

# set up themes and notebook-wide settings
my.alpha = .15
colors <- c("#2185c5ff","#ff9715ff","#f20253ff","#7ecefdff","#1c3aa9ff")
#colors <- c("#264653","#2a9d8f","#e9c46a","#f4a261","#e76f51")
# set friend/rivalry colors
fr.colors <- c(colors[2], colors[3])
se <- function(x) sqrt(var(x)/length(x))  #function to calculate SE
theme_set(theme_classic())
df.clean <- read.csv("./Paper-Analysis/data/df-clean.csv")
df.ling <- read.csv("./Paper-Analysis/data/df-ling.csv")
similarity <- read.csv("./Paper-Analysis/data/Similarity-data.csv")
transcript <- read.csv("./Paper-Analysis/data/survivor-split-sentences.csv")

# print number of sentences/items of dialogue
# remove dialogue spoken by host
transcript <- subset(transcript, Speaker != "Probst")
numObs <- nrow(transcript)
print(paste0("Number of sentences of dialogue = ", numObs, sep = ""))
## [1] "Number of sentences of dialogue = 486"

Linguistic Feature Models (F/R)

Similarity

Note: I’m not mean centering similarity because similarity scores correspond to correlations. A similarity score of 0 is meaningful and I won’t have negative similarity scores (USE calculation).

See more information here.

Format data

# get mean % time chosen per clip, averaged across participants
all_choices <- df.clean %>%
  select(Target, ChoiceOption, ClipNumber, PID, condType, percent.chosen)

# add "Speaker" and "Recipient" columns
all_choices$Speaker <- all_choices$Target
all_choices$Recipient <- all_choices$ChoiceOption

# add dyad pair column
all_choices <- data.frame(all_choices, stringsAsFactors = F) %>%
  mutate(dyad = paste0(pmin(Target,ChoiceOption), pmax(Target,ChoiceOption), sep=""))

# join choices and similarity scores
overall_sim <- inner_join(all_choices, similarity)
overall_sim <- overall_sim %>%
  filter(recent.similarity < .99) # remove high similarity between Alicia/Tom

# save
write.csv(overall_sim, "/Volumes/GoogleDrive/My Drive/SANLab/Experiments/Survivor-Language/Analysis/Paper-Analysis/data/similarity_choices.csv")

Look at simple slopes for when condType is friend and rival. This indicates the relationship between similarity and % of time chosen when in a friend block or a rival block. I am testing if that value is significantly different from zero. Similarity is the moderator!

# only include friend and rival judgments
friend.rival <- subset(overall_sim, condType != "Win")

# make condType a factor
friend.rival$condType <- as.factor(friend.rival$condType)

test <- subset(overall_sim, condType == "Friend")
test.model <- lmer(percent.chosen ~ recent.similarity + (1|dyad) + (1|PID), data = test)
summary(test.model)
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: percent.chosen ~ recent.similarity + (1 | dyad) + (1 | PID)
##    Data: test
## 
## REML criterion at convergence: 1314.4
## 
## Scaled residuals: 
##         Min          1Q      Median          3Q         Max 
## -2.97484932 -0.74153453  0.00251471  0.72794715  2.64913409 
## 
## Random effects:
##  Groups   Name        Variance   Std.Dev. 
##  PID      (Intercept) 0.00000000 0.0000000
##  dyad     (Intercept) 0.02205204 0.1484993
##  Residual             0.07563844 0.2750244
## Number of obs: 4756, groups:  PID, 57; dyad, 21
## 
## Fixed effects:
##                       Estimate   Std. Error           df  t value   Pr(>|t|)
## (Intercept)       4.917640e-01 3.392059e-02 2.322907e+01 14.49750 3.9484e-13
## recent.similarity 1.995255e-02 3.112617e-02 4.751387e+03  0.64102    0.52154
##                      
## (Intercept)       ***
## recent.similarity    
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Correlation of Fixed Effects:
##             (Intr)
## rcnt.smlrty -0.271
## optimizer (nloptwrap) convergence code: 0 (OK)
## boundary (singular) fit: see help('isSingular')
# run multilevel model
model.similarity <- lmer(percent.chosen ~ recent.similarity * condType + (1|dyad) + (1|PID),
                         data = friend.rival)
summary(model.similarity)
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: percent.chosen ~ recent.similarity * condType + (1 | dyad) +  
##     (1 | PID)
##    Data: friend.rival
## 
## REML criterion at convergence: 4179.2
## 
## Scaled residuals: 
##         Min          1Q      Median          3Q         Max 
## -1.99808368 -0.84534351 -0.00015929  0.83881292  1.99809357 
## 
## Random effects:
##  Groups   Name        Variance     Std.Dev.    
##  PID      (Intercept) 2.233851e-17 4.726364e-09
##  dyad     (Intercept) 1.053759e-03 3.246166e-02
##  Residual             9.032085e-02 3.005343e-01
## Number of obs: 9500, groups:  PID, 57; dyad, 21
## 
## Fixed effects:
##                                      Estimate    Std. Error            df
## (Intercept)                        0.42836362    0.01239501  120.98664774
## recent.similarity                  0.23157616    0.03106597 6768.85928530
## condTypeRival                      0.11199200    0.01374135 9487.52639717
## recent.similarity:condTypeRival   -0.36064578    0.04123854 9489.71529974
##                                  t value   Pr(>|t|)    
## (Intercept)                     34.55936 < 2.22e-16 ***
## recent.similarity                7.45433 1.0158e-13 ***
## condTypeRival                    8.15000 4.0999e-16 ***
## recent.similarity:condTypeRival -8.74536 < 2.22e-16 ***
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Correlation of Fixed Effects:
##             (Intr) rcnt.s cndTyR
## rcnt.smlrty -0.741              
## condTypeRvl -0.549  0.590       
## rcnt.sml:TR  0.493 -0.665 -0.894
## optimizer (nloptwrap) convergence code: 0 (OK)
## boundary (singular) fit: see help('isSingular')
# get confidence intervals
confint(model.similarity)
##                                          2.5 %          97.5 %
## .sig01                           0.00000000000  0.006407779448
## .sig02                           0.02207366087  0.046190218114
## .sigma                           0.29626039592  0.304816232076
## (Intercept)                      0.40415674895  0.452630021553
## recent.similarity                0.17087502792  0.292660340659
## condTypeRival                    0.08506712275  0.138928447270
## recent.similarity:condTypeRival -0.44148619502 -0.279846035146
# get simple slopes
simple_slopes(model.similarity)
# plot simple slopes
graph_model(model.similarity, y=percent.chosen, x=condType, lines=recent.similarity, colors = fr.colors)

Plot!

# use ggpredict to get marginal means / predicted values
similarity.df <- ggpredict(model.similarity, terms = c("recent.similarity","condType"), ci.lvl = 0.95)
similarity.df <- as.data.frame(similarity.df)

# get point information for plot (mean similarity and mean time chosen per dyad, per clip)
# each point represents average semantic similarity between each contestant pair for each clip
sim <- friend.rival %>%
  group_by(dyad, ClipNumber, condType) %>%
  summarize(mean.similarity = mean(recent.similarity),
            mean.TimeChosen = mean(percent.chosen))

# plot!
ggplot(data = similarity.df, aes(y = predicted, x = x, color = group, fill = group)) +
  geom_point(data = sim, 
             aes(x = mean.similarity, y = mean.TimeChosen, color = condType, fill = condType), 
             alpha = 0.5, shape = 16) +
  geom_line(size = 1.5) +
  ylim(0, 1) +
  geom_ribbon(alpha = my.alpha,
              aes(ymin = conf.low, ymax = conf.high),
              linetype = .5) +
  ylab("% time chosen") +
  xlab("dyadic-level semantic similarity") +
  theme_classic() +
  scale_color_manual(values = fr.colors, name = "Group") +
  scale_fill_manual(values = fr.colors, name = "Group") +
  theme(panel.background = element_blank(),
        axis.title.x = element_text(vjust = -0.4, size = 16),
        axis.title.y = element_text(vjust = 1.5, size = 16), 
        axis.text = element_text(size = 12),
        legend.text = element_text(size = 10),
        legend.title = element_text(size = 12))

ggsave(filename = "Study2-similarity.pdf",
       path = "/Volumes/GoogleDrive/My Drive/SANLab/Manuscripts/Survivor+Language/MarkdownFigures",
       width = 7, height = 5,
       units = c("in"))

ggsave(filename = "Study2-similarity.jpeg",
       path = "/Volumes/GoogleDrive/My Drive/SANLab/Manuscripts/Survivor+Language/MarkdownFigures",
       width = 7, height = 5,
       units = c("in"))

similarity.plot <- last_plot()

Re-reference

# re-run with rival as reference
friend.rival$condType <- as.factor(friend.rival$condType)
friend.rival <- within(friend.rival, condType <- relevel(condType, ref = "Rival"))

model2.similarity <- lmer(percent.chosen ~ recent.similarity * condType + (1|dyad) + (1|PID),
                          data = friend.rival)
summary(model2.similarity)
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: percent.chosen ~ recent.similarity * condType + (1 | dyad) +  
##     (1 | PID)
##    Data: friend.rival
## 
## REML criterion at convergence: 4179.2
## 
## Scaled residuals: 
##         Min          1Q      Median          3Q         Max 
## -1.99808368 -0.84534351 -0.00015929  0.83881292  1.99809357 
## 
## Random effects:
##  Groups   Name        Variance    Std.Dev.  
##  PID      (Intercept) 0.000000000 0.00000000
##  dyad     (Intercept) 0.001053759 0.03246166
##  Residual             0.090320851 0.30053428
## Number of obs: 9500, groups:  PID, 57; dyad, 21
## 
## Fixed effects:
##                                       Estimate    Std. Error            df
## (Intercept)                         0.54035562    0.01246423  124.69169243
## recent.similarity                  -0.12906962    0.03102888 7239.17613636
## condTypeFriend                     -0.11199200    0.01374135 9487.52640190
## recent.similarity:condTypeFriend    0.36064578    0.04123854 9489.71530381
##                                   t value   Pr(>|t|)    
## (Intercept)                      43.35249 < 2.22e-16 ***
## recent.similarity                -4.15966 3.2240e-05 ***
## condTypeFriend                   -8.15000 4.0999e-16 ***
## recent.similarity:condTypeFriend  8.74536 < 2.22e-16 ***
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Correlation of Fixed Effects:
##             (Intr) rcnt.s cndTyF
## rcnt.smlrty -0.745              
## condTypFrnd -0.556  0.597       
## rcnt.sml:TF  0.495 -0.664 -0.894
## optimizer (nloptwrap) convergence code: 0 (OK)
## boundary (singular) fit: see help('isSingular')
confint(model2.similarity)
##                                           2.5 %          97.5 %
## .sig01                            0.00000000000  0.006408850023
## .sig02                            0.02207473611  0.046190115839
## .sigma                            0.29626052342  0.304816089719
## (Intercept)                       0.51612113617  0.564495083588
## recent.similarity                -0.18964337967 -0.068107202482
## condTypeFriend                   -0.13892832380 -0.085067185220
## recent.similarity:condTypeFriend  0.27984632082  0.441485724365

Compare interaction model

# run multilevel model
model.similarity <- lmer(percent.chosen ~ recent.similarity * condType + (1|dyad) + (1|PID),
                         data = friend.rival)
# run main effect model (null)
null.similarity <- lmer(percent.chosen ~ recent.similarity + condType + (1|dyad) + (1|PID),
                        data = friend.rival)
summary(null.similarity)
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: percent.chosen ~ recent.similarity + condType + (1 | dyad) +  
##     (1 | PID)
##    Data: friend.rival
## 
## REML criterion at convergence: 4250.8
## 
## Scaled residuals: 
##         Min          1Q      Median          3Q         Max 
## -1.99551691 -0.86752174  0.01385268  0.88491228  1.83861264 
## 
## Random effects:
##  Groups   Name        Variance    Std.Dev.  
##  PID      (Intercept) 0.000000000 0.00000000
##  dyad     (Intercept) 0.001066839 0.03266251
##  Residual             0.091038000 0.30172504
## Number of obs: 9500, groups:  PID, 57; dyad, 21
## 
## Fixed effects:
##                        Estimate    Std. Error            df  t value Pr(>|t|)
## (Intercept)        4.864310e-01  1.088555e-02  7.082449e+01 44.68593  < 2e-16
## recent.similarity  5.099317e-02  2.330603e-02  4.366359e+03  2.18798 0.028724
## condTypeFriend    -4.602502e-03  6.191774e-03  9.477248e+03 -0.74333 0.457303
##                      
## (Intercept)       ***
## recent.similarity *  
## condTypeFriend       
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Correlation of Fixed Effects:
##             (Intr) rcnt.s
## rcnt.smlrty -0.640       
## condTypFrnd -0.292  0.012
## optimizer (nloptwrap) convergence code: 0 (OK)
## boundary (singular) fit: see help('isSingular')
confint(null.similarity)
##                             2.5 %         97.5 %
## .sig01             0.000000000000 0.006365521067
## .sig02             0.022216474321 0.046472307022
## .sigma             0.297450037593 0.306039962551
## (Intercept)        0.465322043556 0.507432504946
## recent.similarity  0.005599482829 0.096833207449
## condTypeFriend    -0.016737839976 0.007533422842
anova(model.similarity, null.similarity)

Standardized Betas

# only include friend and rival judgments
friend.rival <- subset(overall_sim, condType != "Win")

# make condType a factor
friend.rival$condType <- as.factor(friend.rival$condType)
test <- subset(overall_sim, condType == "Friend")

# standardize percent.chosen and recent.similarity
friend.rival$percent.chosenZ <- as.numeric(scale(friend.rival$percent.chosen))
friend.rival$recent.similarityZ <- as.numeric(scale(friend.rival$recent.similarity))

# run multilevel model
model.similarity <- lmer(percent.chosenZ ~ recent.similarityZ * condType + (1|dyad) + (1|PID),
                         data = friend.rival)

summary(model.similarity)
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: percent.chosenZ ~ recent.similarityZ * condType + (1 | dyad) +  
##     (1 | PID)
##    Data: friend.rival
## 
## REML criterion at convergence: 26829.4
## 
## Scaled residuals: 
##         Min          1Q      Median          3Q         Max 
## -1.99808368 -0.84534352 -0.00015929  0.83881293  1.99809357 
## 
## Random effects:
##  Groups   Name        Variance     Std.Dev.    
##  PID      (Intercept) 1.730493e-17 4.159919e-09
##  dyad     (Intercept) 1.143642e-02 1.069412e-01
##  Residual             9.802501e-01 9.900758e-01
## Number of obs: 9500, groups:  PID, 57; dyad, 21
## 
## Fixed effects:
##                                       Estimate    Std. Error            df
## (Intercept)                      -7.009233e-03  2.740126e-02  2.682960e+01
## recent.similarityZ                1.143737e-01  1.534325e-02  6.768859e+03
## condTypeRival                     1.517304e-02  2.031759e-02  9.476264e+03
## recent.similarityZ:condTypeRival -1.781203e-01  2.036741e-02  9.489715e+03
##                                   t value   Pr(>|t|)    
## (Intercept)                      -0.25580    0.80006    
## recent.similarityZ                7.45433 1.0158e-13 ***
## condTypeRival                     0.74679    0.45521    
## recent.similarityZ:condTypeRival -8.74536 < 2.22e-16 ***
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Correlation of Fixed Effects:
##             (Intr) rcnt.Z cndTyR
## rcnt.smlrtZ  0.007              
## condTypeRvl -0.370 -0.009       
## rcnt.smZ:TR -0.004 -0.665  0.000
## optimizer (nloptwrap) convergence code: 0 (OK)
## boundary (singular) fit: see help('isSingular')
# get confidence intervals
confint(model.similarity)
##                                           2.5 %         97.5 %
## .sig01                            0.00000000000  0.02112586257
## .sig02                            0.07272269368  0.15216805401
## .sigma                            0.97599641247  1.00418174718
## (Intercept)                      -0.05945144221  0.04524420659
## recent.similarityZ                0.08443368711  0.14452574120
## condTypeRival                    -0.02464742162  0.05499171676
## recent.similarityZ:condTypeRival -0.21804650713 -0.13821401106
# get simple slopes
simple_slopes(model.similarity)
# compare to null model
null.similarity <- lmer(percent.chosenZ ~ recent.similarityZ + condType + (1|dyad) + (1|PID),
                        data = friend.rival)
summary(null.similarity)
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: percent.chosenZ ~ recent.similarityZ + condType + (1 | dyad) +  
##     (1 | PID)
##    Data: friend.rival
## 
## REML criterion at convergence: 26899.6
## 
## Scaled residuals: 
##         Min          1Q      Median          3Q         Max 
## -1.99551688 -0.86752173  0.01385268  0.88491229  1.83861263 
## 
## Random effects:
##  Groups   Name        Variance   Std.Dev. 
##  PID      (Intercept) 0.00000000 0.0000000
##  dyad     (Intercept) 0.01157837 0.1076029
##  Residual             0.98803329 0.9939986
## Number of obs: 9500, groups:  PID, 57; dyad, 21
## 
## Fixed effects:
##                         Estimate    Std. Error            df  t value Pr(>|t|)
## (Intercept)        -8.016346e-03  2.755382e-02  2.678391e+01 -0.29093 0.773343
## recent.similarityZ  2.518515e-02  1.151067e-02  4.366358e+03  2.18798 0.028724
## condTypeRival       1.516242e-02  2.039809e-02  9.477248e+03  0.74333 0.457303
##                     
## (Intercept)         
## recent.similarityZ *
## condTypeRival       
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Correlation of Fixed Effects:
##             (Intr) rcnt.Z
## rcnt.smlrtZ  0.006       
## condTypeRvl -0.370 -0.012
## optimizer (nloptwrap) convergence code: 0 (OK)
## boundary (singular) fit: see help('isSingular')
confint(null.similarity)
##                              2.5 %        97.5 %
## .sig01              0.000000000000 0.02095258372
## .sig02              0.073188549897 0.15309803993
## .sigma              0.979914709055 1.00821412510
## (Intercept)        -0.062733509591 0.04667702249
## recent.similarityZ  0.002703985606 0.04784669498
## condTypeRival      -0.024818018366 0.05514090237
anova(model.similarity, null.similarity)

Sentiment / Clout

Note: Sentiment and clout are within-person (in this case, within Target) mean-centered (not grand mean centered). This allows me to capture variations relative to each Target’s average, rather than relative to the grand mean.

Format data

# get mean % time chosen per clip, averaged across participants
all_choices <- df.ling %>%
  select(Target, ChoiceOption, ClipNumber, PID, BlockType, 
         percent.chosen, recent.sentiment, recent.clout, mean.wordcount)

# add "Speaker" and "Recipient" columns
all_choices$Speaker <- all_choices$Target
all_choices$Recipient <- all_choices$ChoiceOption

# make sure PID is a factor
all_choices$PID <- as.factor(all_choices$PID)

# mean center both recent.sentiment and recent.clout scores
# (centering within target)
all_choices <- isolate(all_choices, by = "Target",
                value = c("recent.sentiment","recent.clout"),
                which = "within")
# rename
names(all_choices)[12] <- "MC_recent.sentiment"
names(all_choices)[13] <- "MC_recent.clout"

Not every contestant speaks to each other in each clip, so we have fewer points for sentiment and clout than for the similarity plot.

Look at simple slopes for when BlockType is friend and rival. This indicates the relationship between sentiment and % of time chosen when in a friend block or a rival block. I am testing if that value is significantly different from zero. Sentiment/clout is the moderator!

Sentiment

# only include friend and rival judgments
friend.rival <- subset(all_choices, BlockType != "Win")

# set up linear mixed effects model
model.sentiment <- lmer(percent.chosen ~ BlockType * MC_recent.sentiment + 
                          (1|PID) + (1|Speaker) + (1|Recipient),
                    data = friend.rival)
summary(model.sentiment)
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: percent.chosen ~ BlockType * MC_recent.sentiment + (1 | PID) +  
##     (1 | Speaker) + (1 | Recipient)
##    Data: friend.rival
## 
## REML criterion at convergence: 1298
## 
## Scaled residuals: 
##        Min         1Q     Median         3Q        Max 
## -1.9198563 -0.8316989  0.1035755  0.7717626  1.7715325 
## 
## Random effects:
##  Groups    Name        Variance     Std.Dev.   
##  PID       (Intercept) 0.000000e+00 0.000000000
##  Speaker   (Intercept) 3.922761e-05 0.006263195
##  Recipient (Intercept) 2.264011e-03 0.047581620
##  Residual              1.027269e-01 0.320510439
## Number of obs: 2250, groups:  PID, 57; Speaker, 7; Recipient, 7
## 
## Fixed effects:
##                                         Estimate    Std. Error            df
## (Intercept)                           0.53820027    0.02063334    7.93088039
## BlockTypeRival                       -0.05233250    0.01355113 2242.98515297
## MC_recent.sentiment                  -0.07167675    0.03971104 2232.65507258
## BlockTypeRival:MC_recent.sentiment    0.06584197    0.05617951 2238.70324664
##                                     t value   Pr(>|t|)    
## (Intercept)                        26.08401 5.6726e-09 ***
## BlockTypeRival                     -3.86186 0.00011572 ***
## MC_recent.sentiment                -1.80496 0.07121602 .  
## BlockTypeRival:MC_recent.sentiment  1.17199 0.24132476    
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Correlation of Fixed Effects:
##             (Intr) BlckTR MC_rc.
## BlockTypRvl -0.332              
## MC_rcnt.snt  0.005 -0.004       
## BlckTR:MC_. -0.002  0.007 -0.687
## optimizer (nloptwrap) convergence code: 0 (OK)
## boundary (singular) fit: see help('isSingular')
# get confidence intervals
confint(model.sentiment)
##                                             2.5 %          97.5 %
## .sig01                              0.00000000000  0.020840526512
## .sig02                              0.00000000000  0.030997979854
## .sig03                              0.02448979516  0.086893804495
## .sigma                              0.31114445726  0.329914459983
## (Intercept)                         0.49562868558  0.580321037437
## BlockTypeRival                     -0.07894670910 -0.025836681632
## MC_recent.sentiment                -0.14922760904  0.006552146365
## BlockTypeRival:MC_recent.sentiment -0.04399188994  0.176403950208
# get simple slopes
simple_slopes(model.sentiment)
# plot simple slopes
graph_model(model.sentiment, y=percent.chosen, x=BlockType, lines=MC_recent.sentiment, colors = fr.colors)

Plot!

# use ggpredict to get marginal means / predicted values
sentiment.df <- ggpredict(model.sentiment, terms = c("MC_recent.sentiment","BlockType"), ci.lvl = 0.95)
sentiment.df <- as.data.frame(sentiment.df)

# get point information for plot (mean sentiment and mean time chosen per speaker/recipient pair, per clip)
# each point represents average sentiment for each speaker/recipient pair for each clip
sent <- friend.rival %>%
  group_by(Target, ChoiceOption, ClipNumber, BlockType) %>%
  summarize(mean.MC_sentiment = mean(MC_recent.sentiment),
            mean.TimeChosen = mean(percent.chosen))

# plot!
ggplot(data = as.data.frame(sentiment.df),
       aes(y = predicted, x = x, color = group, fill = group)) +
  geom_point(data = sent, 
             aes(x = mean.MC_sentiment, y = mean.TimeChosen, color = BlockType, fill = BlockType),
             alpha = 0.5, shape = 16) +
  geom_line(size = 1.5) +
  ylim(0, 1) +
  geom_ribbon(alpha = my.alpha,
              aes(ymin = conf.low, ymax = conf.high),
              linetype = .5) +
  scale_color_manual(values = fr.colors, name = "Group") +
  scale_fill_manual(values = fr.colors, name = "Group") +
  xlab("mean-centered sentiment") +
  ylab("% time chosen") + theme_classic() +
  theme(panel.background = element_blank(),
        axis.title.x = element_text(vjust = -0.4, size = 16),
        axis.title.y = element_text(vjust = 1.5, size = 16), 
        axis.text = element_text(size = 12),
        legend.text = element_text(size = 10),
        legend.title = element_text(size = 12))

ggsave(filename = "Study2-sentiment.pdf",
       path = "/Volumes/GoogleDrive/My Drive/SANLab/Manuscripts/Survivor+Language/MarkdownFigures",
       width = 7, height = 5,
       units = c("in"))

ggsave(filename = "Study2-sentiment.jpeg",
       path = "/Volumes/GoogleDrive/My Drive/SANLab/Manuscripts/Survivor+Language/MarkdownFigures",
       width = 7, height = 5,
       units = c("in"))

sentiment.plot <- last_plot()

Re-reference

# re-run with rival as reference
friend.rival$BlockType <- as.factor(friend.rival$BlockType)
friend.rival <- within(friend.rival, BlockType <- relevel(BlockType, ref = "Rival"))

model2.sentiment <- lmer(percent.chosen ~ BlockType * MC_recent.sentiment + 
                          (1|PID) + (1|Speaker) + (1|Recipient),
                    data = friend.rival)
summary(model2.sentiment)
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: percent.chosen ~ BlockType * MC_recent.sentiment + (1 | PID) +  
##     (1 | Speaker) + (1 | Recipient)
##    Data: friend.rival
## 
## REML criterion at convergence: 1298
## 
## Scaled residuals: 
##        Min         1Q     Median         3Q        Max 
## -1.9198563 -0.8316989  0.1035755  0.7717626  1.7715325 
## 
## Random effects:
##  Groups    Name        Variance     Std.Dev.   
##  PID       (Intercept) 0.000000e+00 0.000000000
##  Speaker   (Intercept) 3.922761e-05 0.006263195
##  Recipient (Intercept) 2.264011e-03 0.047581621
##  Residual              1.027269e-01 0.320510439
## Number of obs: 2250, groups:  PID, 57; Speaker, 7; Recipient, 7
## 
## Fixed effects:
##                                          Estimate    Std. Error            df
## (Intercept)                          4.858678e-01  2.058185e-02  7.855920e+00
## BlockTypeFriend                      5.233250e-02  1.355113e-02  2.242985e+03
## MC_recent.sentiment                 -5.834775e-03  4.084799e-02  2.233605e+03
## BlockTypeFriend:MC_recent.sentiment -6.584197e-02  5.617951e-02  2.238703e+03
##                                      t value   Pr(>|t|)    
## (Intercept)                         23.60662 1.4091e-08 ***
## BlockTypeFriend                      3.86186 0.00011572 ***
## MC_recent.sentiment                 -0.14284 0.88642850    
## BlockTypeFriend:MC_recent.sentiment -1.17199 0.24132476    
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Correlation of Fixed Effects:
##             (Intr) BlckTF MC_rc.
## BlckTypFrnd -0.325              
## MC_rcnt.snt  0.006 -0.005       
## BlckTF:MC_. -0.002  0.007 -0.708
## optimizer (nloptwrap) convergence code: 0 (OK)
## boundary (singular) fit: see help('isSingular')
confint(model2.sentiment)
##                                              2.5 %        97.5 %
## .sig01                               0.00000000000 0.02084052588
## .sig02                               0.00000000000 0.03099798077
## .sig03                               0.02448979581 0.08689380666
## .sigma                               0.31114444033 0.32991445998
## (Intercept)                          0.44348493695 0.52801302595
## BlockTypeFriend                      0.02583667405 0.07894670897
## MC_recent.sentiment                 -0.08530770697 0.07505022815
## BlockTypeFriend:MC_recent.sentiment -0.17640458240 0.04399190852

Compare interaction model

# run multilevel model
model.sentiment <- lmer(percent.chosen ~ BlockType * MC_recent.sentiment + 
                          (1|PID) + (1|Speaker) + (1|Recipient),
                    data = friend.rival)
# run main effect model (null)
null.sentiment <- lmer(percent.chosen ~ BlockType + MC_recent.sentiment + (1|Speaker) + (1|Recipient) + (1|PID),
                        data = friend.rival)
summary(null.sentiment)
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: percent.chosen ~ BlockType + MC_recent.sentiment + (1 | Speaker) +  
##     (1 | Recipient) + (1 | PID)
##    Data: friend.rival
## 
## REML criterion at convergence: 1295.4
## 
## Scaled residuals: 
##         Min          1Q      Median          3Q         Max 
## -1.90560764 -0.83752770  0.08788606  0.77899825  1.82823264 
## 
## Random effects:
##  Groups    Name        Variance     Std.Dev.   
##  PID       (Intercept) 0.000000e+00 0.000000000
##  Recipient (Intercept) 2.291582e-03 0.047870466
##  Speaker   (Intercept) 4.894861e-05 0.006996329
##  Residual              1.027346e-01 0.320522352
## Number of obs: 2250, groups:  PID, 57; Recipient, 7; Speaker, 7
## 
## Fixed effects:
##                          Estimate    Std. Error            df  t value
## (Intercept)            0.48578646    0.02071614    7.89129149 23.44966
## BlockTypeFriend        0.05243437    0.01355184 2243.96764135  3.86917
## MC_recent.sentiment   -0.03972900    0.02886635 2179.28703559 -1.37631
##                       Pr(>|t|)    
## (Intercept)         1.3976e-08 ***
## BlockTypeFriend     0.00011232 ***
## MC_recent.sentiment 0.16886768    
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Correlation of Fixed Effects:
##             (Intr) BlckTF
## BlckTypFrnd -0.323       
## MC_rcnt.snt  0.005 -0.001
## optimizer (nloptwrap) convergence code: 0 (OK)
## boundary (singular) fit: see help('isSingular')
confint(null.sentiment)
##                              2.5 %        97.5 %
## .sig01               0.00000000000 0.02058639800
## .sig02               0.02471964948 0.08740727229
## .sig03               0.00000000000 0.03157694303
## .sigma               0.31123027150 0.32999411650
## (Intercept)          0.44351138626 0.52807678184
## BlockTypeFriend      0.02593299632 0.07905041653
## MC_recent.sentiment -0.09573459145 0.01731137789
anova(model.sentiment, null.sentiment)

Standardized Betas

# only include friend and rival judgments
friend.rival <- subset(all_choices, BlockType != "Win")

# standardize percent chosen & sentiment
friend.rival$percent.chosenZ <- as.numeric(scale(friend.rival$percent.chosen))
friend.rival$MC_recent.sentimentZ <- as.numeric(scale(friend.rival$MC_recent.sentiment))

# set up linear mixed effects model
model.sentiment <- lmer(percent.chosenZ ~ BlockType * MC_recent.sentimentZ+ 
                          (1|PID) + (1|Speaker) + (1|Recipient),
                    data = friend.rival)
summary(model.sentiment)
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: percent.chosenZ ~ BlockType * MC_recent.sentimentZ + (1 | PID) +  
##     (1 | Speaker) + (1 | Recipient)
##    Data: friend.rival
## 
## REML criterion at convergence: 6352.8
## 
## Scaled residuals: 
##        Min         1Q     Median         3Q        Max 
## -1.9198563 -0.8316989  0.1035755  0.7717626  1.7715325 
## 
## Random effects:
##  Groups    Name        Variance    Std.Dev. 
##  PID       (Intercept) 0.000000000 0.0000000
##  Speaker   (Intercept) 0.000371464 0.0192734
##  Recipient (Intercept) 0.021438952 0.1464205
##  Residual              0.972768377 0.9862902
## Number of obs: 2250, groups:  PID, 57; Speaker, 7; Recipient, 7
## 
## Fixed effects:
##                                          Estimate    Std. Error            df
## (Intercept)                            0.06568151    0.06349348    7.93073600
## BlockTypeRival                        -0.16121672    0.04169943 2242.97947087
## MC_recent.sentimentZ                  -0.05327851    0.02951787 2232.65507278
## BlockTypeRival:MC_recent.sentimentZ    0.04894142    0.04175916 2238.70324913
##                                      t value   Pr(>|t|)    
## (Intercept)                          1.03446 0.33143122    
## BlockTypeRival                      -3.86616 0.00011371 ***
## MC_recent.sentimentZ                -1.80496 0.07121602 .  
## BlockTypeRival:MC_recent.sentimentZ  1.17199 0.24132476    
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Correlation of Fixed Effects:
##             (Intr) BlckTR MC_r.Z
## BlockTypRvl -0.332              
## MC_rcnt.snZ  0.003 -0.002       
## BlcTR:MC_.Z -0.001  0.003 -0.687
## optimizer (nloptwrap) convergence code: 0 (OK)
## boundary (singular) fit: see help('isSingular')
# get confidence intervals
confint(model.sentiment)
##                                              2.5 %          97.5 %
## .sig01                               0.00000000000  0.064131363937
## .sig02                               0.00000000000  0.095397674820
## .sig03                               0.07537360849  0.267410890880
## .sigma                               0.95746851222  1.015228710671
## (Intercept)                         -0.06532052890  0.195299223235
## BlockTypeRival                      -0.24311489067 -0.079684778282
## MC_recent.sentimentZ                -0.11092334072  0.004870274456
## BlockTypeRival:MC_recent.sentimentZ -0.03270004288  0.131123962751
# get simple slopes
simple_slopes(model.sentiment)
# compare to null model
null.sentiment <- lmer(percent.chosenZ ~ BlockType + MC_recent.sentimentZ + (1|Speaker) + (1|Recipient) + (1|PID),
                        data = friend.rival)
summary(null.sentiment)
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: percent.chosenZ ~ BlockType + MC_recent.sentimentZ + (1 | Speaker) +  
##     (1 | Recipient) + (1 | PID)
##    Data: friend.rival
## 
## REML criterion at convergence: 6349.7
## 
## Scaled residuals: 
##         Min          1Q      Median          3Q         Max 
## -1.90560764 -0.83752770  0.08788606  0.77899825  1.82823264 
## 
## Random effects:
##  Groups    Name        Variance     Std.Dev.  
##  PID       (Intercept) 0.0000000000 0.00000000
##  Recipient (Intercept) 0.0217000324 0.14730931
##  Speaker   (Intercept) 0.0004635169 0.02152944
##  Residual              0.9728406936 0.98632687
## Number of obs: 2250, groups:  PID, 57; Recipient, 7; Speaker, 7
## 
## Fixed effects:
##                           Estimate    Std. Error            df  t value
## (Intercept)             0.06565908    0.06390345    7.96450143  1.02747
## BlockTypeRival         -0.16135358    0.04170239 2243.96764114 -3.86917
## MC_recent.sentimentZ   -0.02953122    0.02145684 2179.28703261 -1.37631
##                        Pr(>|t|)    
## (Intercept)          0.33438816    
## BlockTypeRival       0.00011232 ***
## MC_recent.sentimentZ 0.16886768    
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Correlation of Fixed Effects:
##             (Intr) BlckTR
## BlockTypRvl -0.330       
## MC_rcnt.snZ  0.004  0.001
## optimizer (nloptwrap) convergence code: 0 (OK)
## boundary (singular) fit: see help('isSingular')
confint(null.sentiment)
##                               2.5 %         97.5 %
## .sig01                0.00000000000  0.06334136671
## .sig02                0.07606850101  0.26897388681
## .sig03                0.00000000000  0.09717008953
## .sigma                0.95773282988  1.01547384045
## (Intercept)          -0.06515825761  0.19551589625
## BlockTypeRival       -0.24325776285 -0.07980227052
## MC_recent.sentimentZ -0.07116113507  0.01286783572
anova(model.sentiment, null.sentiment)

Clout

# only include friend and rival judgments
friend.rival <- subset(all_choices, BlockType != "Win")

# set up linear mixed effects model
model.clout <- lmer(percent.chosen ~ BlockType * MC_recent.clout + 
                    (1|PID) + (1|Speaker) + (1|Recipient),
                    data = friend.rival)
summary(model.clout)
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: percent.chosen ~ BlockType * MC_recent.clout + (1 | PID) + (1 |  
##     Speaker) + (1 | Recipient)
##    Data: friend.rival
## 
## REML criterion at convergence: 1235
## 
## Scaled residuals: 
##         Min          1Q      Median          3Q         Max 
## -2.08580677 -0.82338968  0.00552571  0.85607145  1.98845447 
## 
## Random effects:
##  Groups    Name        Variance     Std.Dev.  
##  PID       (Intercept) 0.0000000000 0.00000000
##  Speaker   (Intercept) 0.0001488774 0.01220153
##  Recipient (Intercept) 0.0018145268 0.04259726
##  Residual              0.0990494274 0.31472119
## Number of obs: 2250, groups:  PID, 57; Speaker, 7; Recipient, 7
## 
## Fixed effects:
##                                     Estimate    Std. Error            df
## (Intercept)                     5.371618e-01  1.937633e-02  8.990239e+00
## BlockTypeRival                 -5.251909e-02  1.331512e-02  2.243670e+03
## MC_recent.clout                 2.816423e-03  3.975850e-04  1.876927e+03
## BlockTypeRival:MC_recent.clout -4.958703e-03  5.394481e-04  2.240196e+03
##                                 t value   Pr(>|t|)    
## (Intercept)                    27.72257 5.1081e-10 ***
## BlockTypeRival                 -3.94432 8.2483e-05 ***
## MC_recent.clout                 7.08383 1.9753e-12 ***
## BlockTypeRival:MC_recent.clout -9.19218 < 2.22e-16 ***
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Correlation of Fixed Effects:
##             (Intr) BlckTR MC_rc.
## BlockTypRvl -0.347              
## MC_rcnt.clt -0.019  0.018       
## BlckTR:MC_.  0.003  0.005 -0.656
## optimizer (nloptwrap) convergence code: 0 (OK)
## boundary (singular) fit: see help('isSingular')
# get confidence intervals
confint(model.clout)
##                                          2.5 %          97.5 %
## .sig01                          0.000000000000  0.019121372822
## .sig02                          0.000000000000  0.036025581158
## .sig03                          0.021504156575  0.078439668808
## .sigma                          0.305529792328  0.323955500802
## (Intercept)                     0.497936847214  0.575994371658
## BlockTypeRival                 -0.078662946562 -0.026485927918
## MC_recent.clout                 0.002043074669  0.003598023646
## BlockTypeRival:MC_recent.clout -0.006017203840 -0.003903238941
# get simple slopes
simple_slopes(model.clout)
# plot simple slopes
graph_model(model.clout, y=percent.chosen, x=BlockType, lines=MC_recent.clout, colors = fr.colors)

Plot!

# use ggpredict to get marginal means / predicted values
clout.df <- ggpredict(model.clout, terms = c("MC_recent.clout","BlockType"), ci.lvl = 0.95)
clout.df <- as.data.frame(clout.df)

# get point information for plot (mean clout and mean time chosen per speaker/recipient pair, per clip)
# each point represents average clout for each speaker/recipient pair for each clip
clout <- friend.rival %>%
  group_by(Target, ChoiceOption, ClipNumber, BlockType) %>%
  summarize(mean.MC_clout = mean(MC_recent.clout),
            mean.TimeChosen = mean(percent.chosen))

# plot!
ggplot(data = as.data.frame(clout.df),
       aes(y = predicted, x = x, color = group, fill = group)) +
  geom_point(data = clout, 
             aes(x = mean.MC_clout, y = mean.TimeChosen, color = BlockType, fill = BlockType),
             alpha = 0.5, shape = 16) +
  geom_line(size = 1.5) +
  ylim(0, 1) +
  geom_ribbon(alpha = my.alpha,
              aes(ymin = conf.low, ymax = conf.high),
              linetype = .5) +
  scale_color_manual(values = fr.colors, name = "Group") +
  scale_fill_manual(values = fr.colors, name = "Group") +
  xlab("mean-centered clout") +
  ylab("% time chosen") + theme_classic() +
  theme(panel.background = element_blank(),
        axis.title.x = element_text(vjust = -0.4, size = 16),
        axis.title.y = element_text(vjust = 1.5, size = 16), 
        axis.text = element_text(size = 12),
        legend.text = element_text(size = 10),
        legend.title = element_text(size = 12))

ggsave(filename = "Study2-clout.pdf",
       path = "/Volumes/GoogleDrive/My Drive/SANLab/Manuscripts/Survivor+Language/MarkdownFigures",
       width = 7, height = 5,
       units = c("in"))

ggsave(filename = "Study2-clout.jpeg",
       path = "/Volumes/GoogleDrive/My Drive/SANLab/Manuscripts/Survivor+Language/MarkdownFigures",
       width = 7, height = 5,
       units = c("in"))

clout.plot <- last_plot()

Re-reference

# re-run with rival as reference
friend.rival$BlockType <- as.factor(friend.rival$BlockType)
friend.rival <- within(friend.rival, BlockType <- relevel(BlockType, ref = "Rival"))

model2.clout <- lmer(percent.chosen ~ BlockType * MC_recent.clout + 
                    (1|PID) + (1|Speaker) + (1|Recipient),
                    data = friend.rival)
summary(model2.clout)
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: percent.chosen ~ BlockType * MC_recent.clout + (1 | PID) + (1 |  
##     Speaker) + (1 | Recipient)
##    Data: friend.rival
## 
## REML criterion at convergence: 1235
## 
## Scaled residuals: 
##         Min          1Q      Median          3Q         Max 
## -2.08580677 -0.82338968  0.00552571  0.85607145  1.98845447 
## 
## Random effects:
##  Groups    Name        Variance     Std.Dev.  
##  PID       (Intercept) 0.0000000000 0.00000000
##  Speaker   (Intercept) 0.0001488774 0.01220153
##  Recipient (Intercept) 0.0018145269 0.04259726
##  Residual              0.0990494274 0.31472119
## Number of obs: 2250, groups:  PID, 57; Speaker, 7; Recipient, 7
## 
## Fixed effects:
##                                      Estimate    Std. Error            df
## (Intercept)                      4.846427e-01  1.932836e-02  8.907346e+00
## BlockTypeFriend                  5.251909e-02  1.331512e-02  2.243670e+03
## MC_recent.clout                 -2.142279e-03  4.096036e-04  1.881688e+03
## BlockTypeFriend:MC_recent.clout  4.958703e-03  5.394481e-04  2.240196e+03
##                                  t value   Pr(>|t|)    
## (Intercept)                     25.07417 1.4344e-09 ***
## BlockTypeFriend                  3.94432 8.2483e-05 ***
## MC_recent.clout                 -5.23013 1.8826e-07 ***
## BlockTypeFriend:MC_recent.clout  9.19218 < 2.22e-16 ***
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Correlation of Fixed Effects:
##             (Intr) BlckTF MC_rc.
## BlckTypFrnd -0.341              
## MC_rcnt.clt  0.002 -0.024       
## BlckTF:MC_. -0.007  0.005 -0.680
## optimizer (nloptwrap) convergence code: 0 (OK)
## boundary (singular) fit: see help('isSingular')
confint(model2.clout)
##                                           2.5 %          97.5 %
## .sig01                           0.000000000000  0.019121665348
## .sig02                           0.000000000000  0.036025580305
## .sig03                           0.021504156108  0.078439667424
## .sigma                           0.305529792064  0.323955500526
## (Intercept)                      0.445285780642  0.523779214859
## BlockTypeFriend                  0.026485927457  0.078662946056
## MC_recent.clout                 -0.002940724859 -0.001338620990
## BlockTypeFriend:MC_recent.clout  0.003903238960  0.006017203857

Compare interaction model

# run multilevel model
model.clout <- lmer(percent.chosen ~ BlockType * MC_recent.clout + 
                    (1|PID) + (1|Speaker) + (1|Recipient),
                    data = friend.rival)
# run main effect model (null)
null.clout <- lmer(percent.chosen ~ BlockType + MC_recent.clout + 
                     (1|Speaker) + (1|Recipient) + (1|PID),
                     data = friend.rival)

anova(model.clout, null.clout)

Standardized Betas

# only include friend and rival judgments
friend.rival <- subset(all_choices, BlockType != "Win")

# standardize percent.chosen and MC_recent.clout
friend.rival$percent.chosenZ <- as.numeric(scale(friend.rival$percent.chosen))
friend.rival$MC_recent.cloutZ <- as.numeric(scale(friend.rival$MC_recent.clout))

# set up linear mixed effects model
model.clout <- lmer(percent.chosenZ ~ BlockType * MC_recent.cloutZ + 
                    (1|PID) + (1|Speaker) + (1|Recipient),
                    data = friend.rival)
summary(model.clout)
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: percent.chosenZ ~ BlockType * MC_recent.cloutZ + (1 | PID) +  
##     (1 | Speaker) + (1 | Recipient)
##    Data: friend.rival
## 
## REML criterion at convergence: 6271.3
## 
## Scaled residuals: 
##         Min          1Q      Median          3Q         Max 
## -2.08580677 -0.82338968  0.00552571  0.85607145  1.98845447 
## 
## Random effects:
##  Groups    Name        Variance    Std.Dev.  
##  PID       (Intercept) 0.000000000 0.00000000
##  Speaker   (Intercept) 0.001409789 0.03754715
##  Recipient (Intercept) 0.017182584 0.13108236
##  Residual              0.937944316 0.96847525
## Number of obs: 2250, groups:  PID, 57; Speaker, 7; Recipient, 7
## 
## Fixed effects:
##                                      Estimate    Std. Error            df
## (Intercept)                        0.06153399    0.05962789    8.99123787
## BlockTypeRival                    -0.16027703    0.04097347 2243.66646496
## MC_recent.cloutZ                   0.21376418    0.03017637 1876.92655976
## BlockTypeRival:MC_recent.cloutZ   -0.37636136    0.04094366 2240.19616569
##                                  t value   Pr(>|t|)    
## (Intercept)                      1.03197    0.32905    
## BlockTypeRival                  -3.91173 9.4363e-05 ***
## MC_recent.cloutZ                 7.08383 1.9753e-12 ***
## BlockTypeRival:MC_recent.cloutZ -9.19218 < 2.22e-16 ***
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Correlation of Fixed Effects:
##             (Intr) BlckTR MC_r.Z
## BlockTypRvl -0.347              
## MC_rcnt.clZ -0.021  0.020       
## BlcTR:MC_.Z  0.004  0.001 -0.656
## optimizer (nloptwrap) convergence code: 0 (OK)
## boundary (singular) fit: see help('isSingular')
# get confidence intervals
confint(model.clout)
##                                          2.5 %         97.5 %
## .sig01                           0.00000000000  0.05883055722
## .sig02                           0.00000000000  0.11085965544
## .sig03                           0.06617362819  0.24137833482
## .sigma                           0.94019103923  0.99689151964
## (Intercept)                     -0.05917051587  0.18103844684
## BlockTypeRival                  -0.24072686148 -0.08016698170
## MC_recent.cloutZ                 0.15507056220  0.27308697745
## BlockTypeRival:MC_recent.cloutZ -0.45670072456 -0.29625256252
# get simple slopes
simple_slopes(model.clout)
null.clout <- lmer(percent.chosenZ ~ BlockType + MC_recent.cloutZ + 
                     (1|Speaker) + (1|Recipient) + (1|PID),
                     data = friend.rival)
summary(null.clout)
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: percent.chosenZ ~ BlockType + MC_recent.cloutZ + (1 | Speaker) +  
##     (1 | Recipient) + (1 | PID)
##    Data: friend.rival
## 
## REML criterion at convergence: 6349.6
## 
## Scaled residuals: 
##        Min         1Q     Median         3Q        Max 
## -1.9089159 -0.8633429  0.0931657  0.7961518  1.7887792 
## 
## Random effects:
##  Groups    Name        Variance     Std.Dev.  
##  PID       (Intercept) 0.0000000000 0.00000000
##  Recipient (Intercept) 0.0193294123 0.13903026
##  Speaker   (Intercept) 0.0007132865 0.02670742
##  Residual              0.9729864215 0.98640074
## Number of obs: 2250, groups:  PID, 57; Recipient, 7; Speaker, 7
## 
## Fixed effects:
##                       Estimate    Std. Error            df  t value   Pr(>|t|)
## (Intercept)         0.06424301    0.06153119    8.42555053  1.04407 0.32548010
## BlockTypeRival     -0.15981864    0.04172369 2244.41002317 -3.83041 0.00013144
## MC_recent.cloutZ    0.03112719    0.02319493 1419.86895833  1.34198 0.17981630
##                     
## (Intercept)         
## BlockTypeRival   ***
## MC_recent.cloutZ    
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Correlation of Fixed Effects:
##             (Intr) BlckTR
## BlockTypRvl -0.343       
## MC_rcnt.clZ -0.022  0.028
## optimizer (nloptwrap) convergence code: 0 (OK)
## boundary (singular) fit: see help('isSingular')
confint(null.clout)
##                           2.5 %         97.5 %
## .sig01            0.00000000000  0.06311208793
## .sig02            0.07096515818  0.25439391855
## .sig03            0.00000000000  0.10082813933
## .sigma            0.95779521451  1.01557524608
## (Intercept)      -0.06216465688  0.18925241659
## BlockTypeRival   -0.24177281347 -0.07821324521
## MC_recent.cloutZ -0.01417816380  0.07689906253
anova(model.clout, null.clout)

Save multi panel plot for linguistic features

# patchwork plot and save
patchwork <- (similarity.plot + sentiment.plot + clout.plot)
patchwork + plot_layout(guides = "collect") & plot_annotation(tag_levels = "A") & theme(plot.tag = element_text(size = 14, face = "bold")) 

ggsave(filename = "Study2-all-features.pdf",
       path = "/Volumes/GoogleDrive/My Drive/SANLab/Manuscripts/Survivor+Language/MarkdownFigures",
       width = 15, height = 5,
       units = c("in"))

ggsave(filename = "Study2-all-features.jpeg",
       path = "/Volumes/GoogleDrive/My Drive/SANLab/Manuscripts/Survivor+Language/MarkdownFigures",
       width = 15, height = 5,
       units = c("in"))

Supplemental: Bayesian approach to examine singularity

We took a Bayesian approach in order to establish that singularity would not undermine the validity of the models. Singularity typically occurs when there is limited variability within the random effects term(s) and/or the random effects structure is overly-complex relative to the variability offered by the data. Given the nature of our data, we opted to retain our random effects in all frequentist models after examining the same models with a Bayesian approach (using uninformative/default priors, which should, and do, yield the same beta estimates as a frequentist approach).

Similarity

library(bayesplot)
library(brms)
library(tidybayes)
# only include friend and rival judgments
friend.rival <- subset(overall_sim, condType != "Win")
sim.friendRival <- subset(sim, condType != "Win")

model.similarity.brms <- brm(data = friend.rival,
      family = gaussian,
      percent.chosen ~ recent.similarity * condType + (1|dyad) + (1|PID), 
      iter = 4000, warmup = 1500, chains = 3, cores = 3,
      control = list(adapt_delta = .999, max_treedepth = 15), 
      seed = 9)

summary(model.similarity.brms, waic = TRUE)
##  Family: gaussian 
##   Links: mu = identity; sigma = identity 
## Formula: percent.chosen ~ recent.similarity * condType + (1 | dyad) + (1 | PID) 
##    Data: friend.rival (Number of observations: 9500) 
##   Draws: 3 chains, each with iter = 4000; warmup = 1500; thin = 1;
##          total post-warmup draws = 7500
## 
## Group-Level Effects: 
## ~dyad (Number of levels: 21) 
##               Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
## sd(Intercept)     0.03      0.01     0.02     0.05 1.00     2439     4509
## 
## ~PID (Number of levels: 57) 
##               Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
## sd(Intercept)     0.00      0.00     0.00     0.01 1.00     5898     3643
## 
## Population-Level Effects: 
##                                 Estimate Est.Error l-95% CI u-95% CI Rhat
## Intercept                           0.43      0.01     0.40     0.45 1.00
## recent.similarity                   0.23      0.03     0.17     0.29 1.00
## condTypeRival                       0.11      0.01     0.09     0.14 1.00
## recent.similarity:condTypeRival    -0.36      0.04    -0.44    -0.28 1.00
##                                 Bulk_ESS Tail_ESS
## Intercept                           3594     5329
## recent.similarity                   5905     6005
## condTypeRival                       5170     5681
## recent.similarity:condTypeRival     5145     5276
## 
## Family Specific Parameters: 
##       Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
## sigma     0.30      0.00     0.30     0.30 1.00    11571     5760
## 
## Draws were sampled using sampling(NUTS). For each parameter, Bulk_ESS
## and Tail_ESS are effective sample size measures, and Rhat is the potential
## scale reduction factor on split chains (at convergence, Rhat = 1).
conditional_effects(model.similarity.brms)

pairs(model.similarity.brms)

plot(model.similarity.brms)

model.similarity.brms %>% 
  neff_ratio() %>% 
  mcmc_neff_hist(binwidth = .1) +
  yaxis_text()

#main effect model for comparison
model.similarity.brms.main <- brm(data = friend.rival,
      family = gaussian,
      percent.chosen ~ recent.similarity + condType + (1|dyad) + (1|PID), 
      iter = 4000, warmup = 1500, chains = 3, cores = 3,
      control = list(adapt_delta = .999, max_treedepth = 15), 
      seed = 9)

#model comparison via loo
model.similarity.brms <- add_criterion(model.similarity.brms, "loo")
model.similarity.brms.main <- add_criterion(model.similarity.brms.main, "loo")

loo_compare(model.similarity.brms, model.similarity.brms.main) %>% 
  print(simplify = F)
##                            elpd_diff se_diff elpd_loo se_elpd_loo p_loo  
## model.similarity.brms          0.0       0.0 -2071.5     50.1        22.7
## model.similarity.brms.main   -37.1       8.5 -2108.6     49.8        21.7
##                            se_p_loo looic   se_looic
## model.similarity.brms          0.2   4143.0   100.3 
## model.similarity.brms.main     0.2   4217.3    99.6
# plot to save
c_similarity <- conditional_effects(model.similarity.brms,
                    effects = "recent.similarity:condType")
c_similarity <- as.data.frame(c_similarity$`recent.similarity:condType`)

ggplot(c_similarity, aes(x = recent.similarity, y = estimate__, color = condType, fill = condType)) +
  geom_point(data = sim.friendRival,
             aes(x = mean.similarity, y = mean.TimeChosen, color = condType, fill = condType),
             alpha = 0.5, shape = 16) +
  geom_line(size = 1.5) +
  ylim(0,1) +
  geom_ribbon(alpha = my.alpha,
              aes(ymin = lower__, ymax = upper__),
              linetype = 0.5) +
  ylab("% time chosen") +
  xlab("dyadic-level semantic similarity") +
  theme_classic() + 
  scale_color_manual(values = fr.colors, name = "Group") +
  scale_fill_manual(values = fr.colors, name = "Group") +
  theme(panel.background = element_blank(),
        axis.title.x = element_text(vjust = -0.4, size = 16),
        axis.title.y = element_text(vjust = 1.5, size = 16), 
        axis.text = element_text(size = 12),
        legend.text = element_text(size = 10),
        legend.title = element_text(size = 12))

similarity.bayesplot <- last_plot()

ggsave(filename = "Study2-bayes-similarity.pdf",
       path = "/Volumes/GoogleDrive/My Drive/SANLab/Manuscripts/Survivor+Language/MarkdownFigures",
       width = 7, height = 5,
       units = c("in"))

ggsave(filename = "Study2-bayes-similarity.jpeg",
       path = "/Volumes/GoogleDrive/My Drive/SANLab/Manuscripts/Survivor+Language/MarkdownFigures",
       width = 7, height = 5,
       units = c("in"))

Sentiment

# only include friend and rival judgments
friend.rival <- subset(all_choices, BlockType != "Win")
sent.friendRival <- subset(sent, BlockType != "Win")

model.sentiment.brms <- brm(data = friend.rival,
      family = gaussian,
      percent.chosen ~ BlockType * MC_recent.sentiment + 
                          (1|PID) + (1|Speaker) + (1|Recipient), 
      iter = 4000, warmup = 1500, chains = 3, cores = 3,
      control = list(adapt_delta = .999, max_treedepth = 15), 
      seed = 9)

summary(model.sentiment.brms, waic = TRUE)
##  Family: gaussian 
##   Links: mu = identity; sigma = identity 
## Formula: percent.chosen ~ BlockType * MC_recent.sentiment + (1 | PID) + (1 | Speaker) + (1 | Recipient) 
##    Data: friend.rival (Number of observations: 2250) 
##   Draws: 3 chains, each with iter = 4000; warmup = 1500; thin = 1;
##          total post-warmup draws = 7500
## 
## Group-Level Effects: 
## ~PID (Number of levels: 57) 
##               Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
## sd(Intercept)     0.01      0.01     0.00     0.02 1.00     4494     3357
## 
## ~Recipient (Number of levels: 7) 
##               Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
## sd(Intercept)     0.06      0.03     0.03     0.14 1.00     2136     2637
## 
## ~Speaker (Number of levels: 7) 
##               Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
## sd(Intercept)     0.01      0.01     0.00     0.04 1.00     3013     2971
## 
## Population-Level Effects: 
##                                    Estimate Est.Error l-95% CI u-95% CI Rhat
## Intercept                              0.54      0.03     0.48     0.60 1.00
## BlockTypeRival                        -0.05      0.01    -0.08    -0.03 1.00
## MC_recent.sentiment                   -0.07      0.04    -0.15     0.01 1.00
## BlockTypeRival:MC_recent.sentiment     0.06      0.06    -0.05     0.17 1.00
##                                    Bulk_ESS Tail_ESS
## Intercept                              2252     2622
## BlockTypeRival                        13097     5346
## MC_recent.sentiment                    6509     5924
## BlockTypeRival:MC_recent.sentiment     6641     5734
## 
## Family Specific Parameters: 
##       Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
## sigma     0.32      0.00     0.31     0.33 1.00    13031     5188
## 
## Draws were sampled using sampling(NUTS). For each parameter, Bulk_ESS
## and Tail_ESS are effective sample size measures, and Rhat is the potential
## scale reduction factor on split chains (at convergence, Rhat = 1).
conditional_effects(model.sentiment.brms)

pairs(model.sentiment.brms)

plot(model.sentiment.brms)

model.sentiment.brms %>% 
  neff_ratio() %>% 
  mcmc_neff_hist(binwidth = .1) +
  yaxis_text()

#main effect model
model.sentiment.brms.main <- brm(data = friend.rival,
      family = gaussian,
      percent.chosen ~ BlockType + MC_recent.sentiment + 
                          (1|PID) + (1|Speaker) + (1|Recipient), 
      iter = 4000, warmup = 1500, chains = 3, cores = 3,
      control = list(adapt_delta = .999, max_treedepth = 15), 
      seed = 9)

summary(model.sentiment.brms.main, waic = TRUE)
##  Family: gaussian 
##   Links: mu = identity; sigma = identity 
## Formula: percent.chosen ~ BlockType + MC_recent.sentiment + (1 | PID) + (1 | Speaker) + (1 | Recipient) 
##    Data: friend.rival (Number of observations: 2250) 
##   Draws: 3 chains, each with iter = 4000; warmup = 1500; thin = 1;
##          total post-warmup draws = 7500
## 
## Group-Level Effects: 
## ~PID (Number of levels: 57) 
##               Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
## sd(Intercept)     0.01      0.01     0.00     0.02 1.00     4348     3659
## 
## ~Recipient (Number of levels: 7) 
##               Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
## sd(Intercept)     0.06      0.03     0.03     0.13 1.00     2490     2944
## 
## ~Speaker (Number of levels: 7) 
##               Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
## sd(Intercept)     0.01      0.01     0.00     0.04 1.00     3403     3913
## 
## Population-Level Effects: 
##                     Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
## Intercept               0.54      0.03     0.48     0.59 1.00     3050     3662
## BlockTypeRival         -0.05      0.01    -0.08    -0.03 1.00    15787     5047
## MC_recent.sentiment    -0.04      0.03    -0.10     0.02 1.00    13456     5310
## 
## Family Specific Parameters: 
##       Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
## sigma     0.32      0.00     0.31     0.33 1.00    14021     5403
## 
## Draws were sampled using sampling(NUTS). For each parameter, Bulk_ESS
## and Tail_ESS are effective sample size measures, and Rhat is the potential
## scale reduction factor on split chains (at convergence, Rhat = 1).
#model comparison via loo
model.sentiment.brms <- add_criterion(model.sentiment.brms, "loo")
model.sentiment.brms.main <- add_criterion(model.sentiment.brms.main, "loo")

loo_compare(model.sentiment.brms, model.sentiment.brms.main) %>% 
  print(simplify = F)
##                           elpd_diff se_diff elpd_loo se_elpd_loo p_loo 
## model.sentiment.brms.main    0.0       0.0  -639.7     22.8        13.4
## model.sentiment.brms        -0.2       1.1  -639.9     22.8        14.2
##                           se_p_loo looic  se_looic
## model.sentiment.brms.main    0.3   1279.4   45.6  
## model.sentiment.brms         0.3   1279.7   45.6
# plot to save
c_sentiment <- conditional_effects(model.sentiment.brms,
                    effects = "MC_recent.sentiment:BlockType")
c_sentiment <- as.data.frame(c_sentiment$`MC_recent.sentiment:BlockType`)

ggplot(c_sentiment, aes(x = MC_recent.sentiment, y = estimate__, color = BlockType, fill = BlockType)) +
  geom_point(data = sent.friendRival,
             aes(x = mean.MC_sentiment, y = mean.TimeChosen, color = BlockType, fill = BlockType),
             alpha = 0.5, shape = 16) +
  geom_line(size = 1.5) +
  ylim(0,1) +
  geom_ribbon(alpha = my.alpha,
              aes(ymin = lower__, ymax = upper__),
              linetype = 0.5) +
  ylab("% time chosen") +
  xlab("mean-centered sentiment") +
  theme_classic() + 
  scale_color_manual(values = fr.colors, name = "Group") +
  scale_fill_manual(values = fr.colors, name = "Group") +
  theme(panel.background = element_blank(),
        axis.title.x = element_text(vjust = -0.4, size = 16),
        axis.title.y = element_text(vjust = 1.5, size = 16), 
        axis.text = element_text(size = 12),
        legend.text = element_text(size = 10),
        legend.title = element_text(size = 12))

sentiment.bayesplot <- last_plot()

ggsave(filename = "Study2-bayes-sentiment.pdf",
       path = "/Volumes/GoogleDrive/My Drive/SANLab/Manuscripts/Survivor+Language/MarkdownFigures",
       width = 7, height = 5,
       units = c("in"))

ggsave(filename = "Study2-bayes-sentiment.jpeg",
       path = "/Volumes/GoogleDrive/My Drive/SANLab/Manuscripts/Survivor+Language/MarkdownFigures",
       width = 7, height = 5,
       units = c("in"))

Clout

# only include friend and rival judgments
friend.rival <- subset(all_choices, BlockType != "Win")
clout.friendRival <- subset(clout, BlockType != "Win")

model.clout.brms <- brm(data = friend.rival,
      family = gaussian,
      percent.chosen ~ BlockType * MC_recent.clout + 
                          (1|PID) + (1|Speaker) + (1|Recipient), 
      iter = 4000, warmup = 1500, chains = 3, cores = 3,
      control = list(adapt_delta = .999, max_treedepth = 15), 
      seed = 9)

summary(model.clout.brms, waic = TRUE)
##  Family: gaussian 
##   Links: mu = identity; sigma = identity 
## Formula: percent.chosen ~ BlockType * MC_recent.clout + (1 | PID) + (1 | Speaker) + (1 | Recipient) 
##    Data: friend.rival (Number of observations: 2250) 
##   Draws: 3 chains, each with iter = 4000; warmup = 1500; thin = 1;
##          total post-warmup draws = 7500
## 
## Group-Level Effects: 
## ~PID (Number of levels: 57) 
##               Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
## sd(Intercept)     0.01      0.01     0.00     0.02 1.00     3960     3206
## 
## ~Recipient (Number of levels: 7) 
##               Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
## sd(Intercept)     0.06      0.02     0.03     0.12 1.00     2682     3797
## 
## ~Speaker (Number of levels: 7) 
##               Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
## sd(Intercept)     0.02      0.01     0.00     0.05 1.00     2402     2847
## 
## Population-Level Effects: 
##                                Estimate Est.Error l-95% CI u-95% CI Rhat
## Intercept                          0.54      0.03     0.48     0.59 1.00
## BlockTypeRival                    -0.05      0.01    -0.08    -0.03 1.00
## MC_recent.clout                    0.00      0.00     0.00     0.00 1.00
## BlockTypeRival:MC_recent.clout    -0.00      0.00    -0.01    -0.00 1.00
##                                Bulk_ESS Tail_ESS
## Intercept                          2935     3694
## BlockTypeRival                     9083     5841
## MC_recent.clout                    7252     6329
## BlockTypeRival:MC_recent.clout     7223     4873
## 
## Family Specific Parameters: 
##       Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
## sigma     0.31      0.00     0.31     0.32 1.00    10901     5636
## 
## Draws were sampled using sampling(NUTS). For each parameter, Bulk_ESS
## and Tail_ESS are effective sample size measures, and Rhat is the potential
## scale reduction factor on split chains (at convergence, Rhat = 1).
conditional_effects(model.clout.brms)

pairs(model.clout.brms)

plot(model.clout.brms)

model.clout.brms %>% 
  neff_ratio() %>% 
  mcmc_neff_hist(binwidth = .1) +
  yaxis_text()

#main effect model for comparison
model.clout.brms.main <- brm(data = friend.rival,
      family = gaussian,
      percent.chosen ~ BlockType + MC_recent.clout + 
                          (1|PID) + (1|Speaker) + (1|Recipient), 
      iter = 4000, warmup = 1500, chains = 3, cores = 3,
      control = list(adapt_delta = .999, max_treedepth = 15), 
      seed = 9)

summary(model.clout.brms.main, waic = TRUE)
##  Family: gaussian 
##   Links: mu = identity; sigma = identity 
## Formula: percent.chosen ~ BlockType + MC_recent.clout + (1 | PID) + (1 | Speaker) + (1 | Recipient) 
##    Data: friend.rival (Number of observations: 2250) 
##   Draws: 3 chains, each with iter = 4000; warmup = 1500; thin = 1;
##          total post-warmup draws = 7500
## 
## Group-Level Effects: 
## ~PID (Number of levels: 57) 
##               Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
## sd(Intercept)     0.01      0.01     0.00     0.02 1.00     3958     2935
## 
## ~Recipient (Number of levels: 7) 
##               Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
## sd(Intercept)     0.06      0.03     0.03     0.13 1.00     2286     3747
## 
## ~Speaker (Number of levels: 7) 
##               Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
## sd(Intercept)     0.01      0.01     0.00     0.04 1.00     2879     2945
## 
## Population-Level Effects: 
##                 Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
## Intercept           0.54      0.03     0.48     0.59 1.00     2707     3951
## BlockTypeRival     -0.05      0.01    -0.08    -0.03 1.00    11309     5519
## MC_recent.clout     0.00      0.00    -0.00     0.00 1.00     8131     5577
## 
## Family Specific Parameters: 
##       Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
## sigma     0.32      0.00     0.31     0.33 1.00    11368     5365
## 
## Draws were sampled using sampling(NUTS). For each parameter, Bulk_ESS
## and Tail_ESS are effective sample size measures, and Rhat is the potential
## scale reduction factor on split chains (at convergence, Rhat = 1).
#model comparison via loo
model.clout.brms <- add_criterion(model.clout.brms, "loo")
model.clout.brms.main <- add_criterion(model.clout.brms.main, "loo")

loo_compare(model.clout.brms, model.clout.brms.main) %>% 
  print(simplify = F)
##                       elpd_diff se_diff elpd_loo se_elpd_loo p_loo  se_p_loo
## model.clout.brms         0.0       0.0  -599.5     23.2        14.5    0.3  
## model.clout.brms.main  -40.4       8.6  -639.8     22.8        13.3    0.3  
##                       looic  se_looic
## model.clout.brms      1198.9   46.4  
## model.clout.brms.main 1279.7   45.5
# plot to save
c_clout <- conditional_effects(model.clout.brms,
                    effects = "MC_recent.clout:BlockType")
c_clout <- as.data.frame(c_clout$`MC_recent.clout:BlockType`)

ggplot(c_clout, aes(x = MC_recent.clout, y = estimate__, color = BlockType, fill = BlockType)) +
  geom_point(data = clout.friendRival,
             aes(x = mean.MC_clout, y = mean.TimeChosen, color = BlockType, fill = BlockType),
             alpha = 0.5, shape = 16) +
  geom_line(size = 1.5) +
  ylim(0,1) +
  geom_ribbon(alpha = my.alpha,
              aes(ymin = lower__, ymax = upper__),
              linetype = 0.5) +
  ylab("% time chosen") +
  xlab("mean-centered clout") +
  theme_classic() + 
  scale_color_manual(values = fr.colors, name = "Group") +
  scale_fill_manual(values = fr.colors, name = "Group") +
  theme(panel.background = element_blank(),
        axis.title.x = element_text(vjust = -0.4, size = 16),
        axis.title.y = element_text(vjust = 1.5, size = 16), 
        axis.text = element_text(size = 12),
        legend.text = element_text(size = 10),
        legend.title = element_text(size = 12))

clout.bayesplot <- last_plot()

ggsave(filename = "Study2-bayes-clout.pdf",
       path = "/Volumes/GoogleDrive/My Drive/SANLab/Manuscripts/Survivor+Language/MarkdownFigures",
       width = 7, height = 5,
       units = c("in"))

ggsave(filename = "Study2-bayes-clout.jpeg",
       path = "/Volumes/GoogleDrive/My Drive/SANLab/Manuscripts/Survivor+Language/MarkdownFigures",
       width = 7, height = 5,
       units = c("in"))

Plot all three!

# patchwork plot and save
patchwork <- (similarity.bayesplot + sentiment.bayesplot + clout.bayesplot)
patchwork + plot_layout(guides = "collect") & plot_annotation(tag_levels = "A") & theme(plot.tag = element_text(size = 14, face = "bold")) 

ggsave(filename = "Study2-Bayes-all-features.pdf",
       path = "/Volumes/GoogleDrive/My Drive/SANLab/Manuscripts/Survivor+Language/MarkdownFigures",
       width = 15, height = 5,
       units = c("in"))

ggsave(filename = "Study2-Bayes-all-features.jpeg",
       path = "/Volumes/GoogleDrive/My Drive/SANLab/Manuscripts/Survivor+Language/MarkdownFigures",
       width = 15, height = 5,
       units = c("in"))